The overarching goal of this project is to determine the degree to which several message framing interventions might enhance message effectiveness and intentions, norms, and beliefs related to social distancing. Specifically, here we test the effect of humorous framing of health messages promoting social distancing behavior. We use two types of humorous framings:

  • mocking frame: humorously mocking or ridiculing individuals who might choose to not follow social distancing even though their circumstances might clearly allow them to socially distance themselves.
  • encouraging frame: humorous framing of the health message which does not overtly mock any individual or group.

Participants were randomly assigned to either a message framing intervention group (using encouraging or mocking humor), a control message group, or a group that saw no messages.Each participant in the intervention and message control groups saw a series of 5 messages about social distancing related to COVID-19 randomly sampled from a pool of 15 messages for pilot 2, which previously normed for argument strength (M = 4.16, SD = 0.14, possible range = 1-5). Each message was created to look like an instagram post that included a visual message about COVID-19 accompanied by a “post” about the message.The message control condition contained this stem only, whereas the humorous-framing conditions contained additional text framing the messages humorously (i.e., adding a joke, using the stem as premise). Particpants then completed various outcome and individual differences measures.

Pretest

non-mocking

df <- read.csv('pretest1/COVID-19 humorous messages testing_March 26, 2020_20.08.csv')

### Extract variable name data
varnames<- df[1:2,]
df <- df[3:dim(df)[1],]

### Filter based on progress
df$Progress<- as.numeric(as.character(df$Progress))
df <- df[df$Progress >= 90,]

### Recoding values
recoding_cols <- colnames(df)[11:198]
for (c in recoding_cols) {
  df[,c] <- recode(df[,c], `Not at all funny` = 1, `A bit funny` = 2, `Somewhat funny` = 3, `Very funny` = 4, `Extremely funny` = 5, `Not at all mocking` = 1, `A bit mocking` = 2, `Somewhat mocking` = 3, `Very mocking` = 4, `Extremely mocking` = 5)
}

### Extract encouraging (non-mocking) messages
df_enc <- df %>%
  dplyr::select(!matches("^moc_")) %>%
  mutate(attention_check = enc_1.1_11 == 3 & enc_1.2_11 == 1 & 
                           enc_3.1_11 == 5 & enc_3.2_11 == 2 & 
                           enc_5.1_10 == 1 & enc_5.2_10 == 3) %>% # attention checks
  filter(attention_check == TRUE) %>%
  dplyr::select(-enc_1.1_11, -enc_1.2_11, -enc_3.1_11, -enc_3.2_11, -enc_5.1_10, -enc_5.2_10) # removing attention check variables

### convert from wide to long form
df_enc <- df_enc %>%
  dplyr::select(matches("enc_")) %>%
  mutate(SID = sprintf("S%02d", seq.int(nrow(.)))) %>%
  gather("item", "score", -SID) %>%
  extract(item, c("block", "question_type", "msg_number"), "enc_([1-5]).([1-2])_([0-9]+)", remove = TRUE) %>%
  mutate(item = sprintf("enc_%s_%s", block, msg_number)) %>%
  mutate(question_type  = ifelse(question_type == 1, "funniness",
                                 ifelse(question_type == 2, "mocking", "other"))) %>%
  dplyr::select(SID, item, question_type, score) %>%
  spread(question_type, score) 

### load item texts and merge with data frame
item_texts <- read.csv('pretest1/encouraging_stim.csv') %>%
  dplyr::select(item, text)

df_enc <- df_enc %>%
  left_join(., item_texts, by = "item")

We ran a short pre-test to test our intended manipulations of funniness and mockingness in the non-mocking humorous texts we designed. We recuited 32 participants to rate how funny (1 = Not at all funny, 5 = Extremely funny) and mocking (1 = Not at all mocking, 5 = Extremely mocking) these humorous texts were.

mocking

# Loading data and formatting it to make it analyzable
df <- read.csv('pretest2/COVID-19 humorous messages testing (mocking only)_March 31, 2020_18.26.csv')

### Extract variable name data
varnames<- df[1:2,]
df <- df[3:dim(df)[1],]

### Filter based on progress
df$Progress<- as.numeric(as.character(df$Progress))
df <- df[df$Progress >= 90,]

### Recoding values
recoding_cols <- colnames(df)[11:64]
for (c in recoding_cols) {
  df[,c] <- recode(df[,c], `Not at all funny` = 1, `A bit funny` = 2, `Somewhat funny` = 3, `Very funny` = 4, `Extremely funny` = 5, `Not at all mocking` = 1, `A bit mocking` = 2, `Somewhat mocking` = 3, `Very mocking` = 4, `Extremely mocking` = 5)
}

### Removing unwanted columns
df <- df[,c("ResponseId", recoding_cols)]

### Filtering based on attention checks
df$attention_check <- df$att_check_funniness == 4 & df$att_check_mocking == 3 &
                      df$att_check_funniness.1 == 1 & df$att_check_mocking.1 == 4

df <- df[df$attention_check,]

### Removing attention check variables
df$att_check_funniness <- NULL
df$att_check_funniness.1 <- NULL
df$att_check_mocking <- NULL
df$att_check_mocking.1 <- NULL
df$attention_check <- NULL

### convert from wide to long form
list_items <- colnames(df)[2:51]
df <- df %>%
  gather(key = "item", value = "value", list_items) %>%
  separate(item, c("image", "joke_number", "rating_type")) %>%
  unite("item", c("image", "joke_number")) %>%
  spread(rating_type, value)

### extracting list of joke texts
varnames <- varnames[1,list_items]
varnames <- varnames %>%
  gather(key = "item", value = "text") %>%
  separate(item, c("image", "joke_number", "rating_type")) %>%
  unite("item", c("image", "joke_number"))
varnames  <- varnames[varnames$rating_type == "funniness",]
varnames <- varnames %>%
  separate(text, c("remove_this", "joke_text"), sep = "  - Please indicate how funny you find the following messages. - ") %>%
  separate(item, c("remove_this2", "item"), sep = "X")
varnames$remove_this <- NULL
varnames$remove_this2 <- NULL
varnames$rating_type <- NULL

### merge joke texts with the data
df <- df %>%
  separate(item, c("remove_this", "item"), sep = "X") %>%
  dplyr::select(-remove_this)
df <- merge(df, varnames, by = "item")

We ran a short pre-test to test our intended manipulations of funniness and mockingness in the mocking humorous texts we designed. We recuited 37 participants to rate how funny (1 = Not at all funny, 5 = Extremely funny) and mocking (1 = Not at all mocking, 5 = Extremely mocking) these humorous texts were.

Selecting jokes based on ratings

Jokes in decreasing order of funniness

item joke_text funniness mocking
32_1 It’s really pretty simple. Stay at home as much as possible, and meet with co-workers online. That is right, you can now mute your boss, they won’t even know. 3.054 1.919
09_2 It’s not just about you getting infected, it’s about you infecting others #covid19. So unless you’re willing to trade your bathing suit for a hazmat suit this spring break, stay home. 2.703 2.054
20_1 We are staying home today to make a difference in our fight against the coronavirus. No one likes that one guy from work who is DYING to be at the office. No Gary, I am NOT interested in martini Wednesdays. 2.595 2.595
24_1 Avoiding even one social contact can have a huge impact on limiting the spread of #covid19. However some people will be irresponsible and stupid enough to attend social gatherings. Avoid them, avoid them like you avoid people with clipboard on streets. 2.568 2.568
31_1 Staying home helps stop the spread of #coronavirus. And yet some people will insist on NOT working from home. Why? Cause you miss passing by co-workers on the way to the bathroom? 2.568 2.459
31_2 Staying home helps stop the spread of #coronavirus. There will be more spring breaks, there will be more Coachellas. Trust me, there will be more Coachellas. 2.486 1.973
07_2 Avoid large gatherings and stay at home as much as possible #covid19. People bragging about avoiding social distancing online are the same people who would crowd left lanes on highways. 2.459 2.405
07_1 Avoid large gatherings, stay at home as much as possible #covid19 And don’t worry, you’re not missing out. This year at Coachella the audience will be a hologram. 2.432 1.811
23_1 Avoiding crowds and staying home as much as possible helps protect the vulnerable. Don’t travel and communicate the damn disease like a coronavirus traveling salesman. 2.405 2.189
09_1 It’s not just about you getting infected, it’s about you infecting others #covid19. Hanging out with friends right now is the moral equivalent of sneezing at an old lady and saying “peace out” 2.324 2.162
15_2 Social distancing is critical for reducing exposure to #covid19. Unless you are in a hazmat suit, you are definitely speading some virus unwittingly. And I am guessing these kids are not wearing a hazmat suit on their spring break. 2.324 2.73
10_2 Try to stay home and not be a contagion vector. Unless you want to infect all those at higher risk than you, who might actually die from #covid19, just because you couldn’t scratch the itch of getting a flaming Dr. Pepper shot at 2 in the afternoon on a Tuesday. 2.27 2.676
25_1 Thinking of going out for a cheeky last pint? Don’t. Uh-oh you are leaving the door. Darnit you can trasmit the disease! Don’t do it! And welp you have now shut the door behind you. We are all going to die. 2.27 2.405
06_1 Staying home protects our community by stopping the spread of #covid19. Not staying at home, and I am trying to put it delicately, does NOT stop the spread of virus, instead helps it. So please, put that pair of floral boxers down, leave the store and get back home. 2.243 2.486
12_2 Coronavirus is far more dangerous than the common flu. If you think this won’t affect you think again. Then if you still think this won’t affect you, think again again. 2.216 1.919
08_1 Practice social distancing and help slow the spread of coronavirus! Or you know, whatever, go infect everyone in crowded places like a sociopath. 2.189 2.459
15_1 Social distancing is critical for reducing exposure to #covid19. You must stay home to stop hundreds of others from being infected! Not staying home is the moral equivalent of coughing on an old lady’s face and then shrugging. 2.108 2.054
24_2 Avoiding even one social contact can have a huge impact on limiting the spread of #covid19. But yeah, going pub crawling with your friends is WAY more important of course. 2.108 2.865
10_1 Think of all the people who might get infected because of you if you venture out. Instead, if you really want to “go viral” stay home and make a YouTube video. 2.081 1.73
17_2 We all have a role to play in stopping the spread of #coronavirus, so stay home if you can. Too many have already died after getting infected from some special people trying to act too smart and trivializing the virus. These are the same special people who would wear headphones while jaywalking. 2.081 2.676
17_1 We all have a role to play in stopping the spread of #coronavirus, so stay home if you can. At this point, impulse adopting a DOG would be smarter than going out. 2.054 2
32_2 It’s really pretty simple. Stay at home as much as possible. Simple enough, right? Well, tell that to the people who are finding a global pandemic to be just convenient enough to make spring break plans.
2
2.459
18_1 Here is a message from doctors and nurses fighting on the front lines. Let’s stay home to support them in this fight against #coronavirus. The people who will still go out to crowded areas after reading this plea from doctors and nurses might finally answer the question: is it possible to be more reprehensible than an internet troll? 1.946 2.405
08_2 Practice social distancing and help slow the spread of coronavirus! hose who think they are too good for social distancing are the same people who don’t tip waiters at restaurants. 1.892 2.622
12_1 Coronavirus is far more dangerous than the common flu. Whoever told you that “it is just 0.5% it is not gonna kill us”, well even if it doesn’t kill them, it will kill 480,000 people just in the US. Whoever that jerk is, doesn’t deserve your attention anymore. Admit it, their social media was annoying to begin with. 1.892 2.351

Final set of jokes

image joke_text funniness mocking
32 It’s really pretty simple. Stay at home as much as possible, and meet with co-workers online. That is right, you can now mute your boss, they won’t even know. 3.054 1.919
9 It’s not just about you getting infected, it’s about you infecting others #covid19. So unless you’re willing to trade your bathing suit for a hazmat suit this spring break, stay home. 2.703 2.054
20 We are staying home today to make a difference in our fight against the coronavirus. No one likes that one guy from work who is DYING to be at the office. No Gary, I am NOT interested in martini Wednesdays. 2.595 2.595
24 Avoiding even one social contact can have a huge impact on limiting the spread of #covid19. However some people will be irresponsible and stupid enough to attend social gatherings. Avoid them, avoid them like you avoid people with clipboard on streets. 2.568 2.568
31 Staying home helps stop the spread of #coronavirus. And yet some people will insist on NOT working from home. Why? Cause you miss passing by co-workers on the way to the bathroom? 2.568 2.459
7 Avoid large gatherings and stay at home as much as possible #covid19. People bragging about avoiding social distancing online are the same people who would crowd left lanes on highways. 2.459 2.405
23 Avoiding crowds and staying home as much as possible helps protect the vulnerable. Don’t travel and communicate the damn disease like a coronavirus traveling salesman. 2.405 2.189
15 Social distancing is critical for reducing exposure to #covid19. Unless you are in a hazmat suit, you are definitely speading some virus unwittingly. And I am guessing these kids are not wearing a hazmat suit on their spring break. 2.324 2.73
10 Try to stay home and not be a contagion vector. Unless you want to infect all those at higher risk than you, who might actually die from #covid19, just because you couldn’t scratch the itch of getting a flaming Dr. Pepper shot at 2 in the afternoon on a Tuesday. 2.27 2.676
25 Thinking of going out for a cheeky last pint? Don’t. Uh-oh you are leaving the door. Darnit you can trasmit the disease! Don’t do it! And welp you have now shut the door behind you. We are all going to die. 2.27 2.405
6 Staying home protects our community by stopping the spread of #covid19. Not staying at home, and I am trying to put it delicately, does NOT stop the spread of virus, instead helps it. So please, put that pair of floral boxers down, leave the store and get back home. 2.243 2.486
12 Coronavirus is far more dangerous than the common flu. If you think this won’t affect you think again. Then if you still think this won’t affect you, think again again. 2.216 1.919
8 Practice social distancing and help slow the spread of coronavirus! Or you know, whatever, go infect everyone in crowded places like a sociopath. 2.189 2.459
17 We all have a role to play in stopping the spread of #coronavirus, so stay home if you can. Too many have already died after getting infected from some special people trying to act too smart and trivializing the virus. These are the same special people who would wear headphones while jaywalking. 2.081 2.676
18 Here is a message from doctors and nurses fighting on the front lines. Let’s stay home to support them in this fight against #coronavirus. The people who will still go out to crowded areas after reading this plea from doctors and nurses might finally answer the question: is it possible to be more reprehensible than an internet troll? 1.946 2.405

Pilot 1

Here, we test the effect of “encouraging” humor versus message control, which were non-humorous. Encouraging humor refers to humorous framing of health messages such that these messages were not overtly mocking any person or group of people.

## tidy data for analysis

data = data_pilot1

messages = data %>%
  filter(condition %in% c("message control", "encouraging")) %>%
  filter(grepl("msg", survey_name)) %>%
  mutate(value = as.numeric(value),
         value = ifelse(item == "cognition_2", abs(6 - value), value),
         value = ifelse(item == "cognition_4", abs(6 - value), value),
         value = ifelse(item == "cognition_6", abs(6 - value), value)) %>% 
  extract(item, "item", "msg_.*_(.*)") %>%
  spread(survey_name, value) %>%
  mutate(msg_favorability = msg_positive - msg_negative) %>%
  dplyr::select(-msg_negative, -msg_positive) %>%
  gather(survey_name, value, contains("msg")) %>%
  mutate(item = sprintf("%s_%s", survey_name, item))

data_tidy = data %>%
  filter(condition %in% c("message control", "encouraging")) %>%
  mutate(condition = str_replace(condition, "-paired|-unpaired", "")) %>%
  filter(grepl("cognition|intentions|norms_close|norms_town|beliefs|beliefs|politics_party|politics_conserv|^age$|gender", survey_name)) %>%
  mutate(value = as.numeric(value)) %>% 
  bind_rows(messages)

control_vars = data %>%
  filter(grepl("state|gender|^age$", survey_name)) %>%
  dplyr::select(condition, SID, survey_name, value) %>%
  unique() %>%
  spread(survey_name, value) %>%
  mutate(state = as.factor(state),
         gender = recode(gender, "1" = "male", "2" = "female", "3" = "other", "4" = "prefer not to say"),
         age = scale(as.numeric(age), center = TRUE, scale = FALSE))

data_person = data_tidy %>%
  filter(grepl("msg|cognition|beliefs|intentions1_2|intentions1_4|intentions1_6|intentions1_10|norms_close1_2|norms_close1_4|norms_close1_6|norms_close1_10|norms_town1_2|norms_town1_4|norms_town1_6|norms_town1_10|politics_party|politics_conserv", item)) %>%
  group_by(condition, SID, survey_name) %>%
  summarize(value = mean(value, na.rm = TRUE))

number of subjects per condition

visualize condition effects

In this section, we summarize condition effects on message ratings and other subject-level DVs and covariates.

person-level

A summary of condition effects on message ratings and other DVs/covariates at person level.

other DVs and covariates

survey level

A summary of condition effects on each survey, aggregated across survey items. ###### intentions, beliefs, cognition

message effects

In this section, we summarize the effect of messages on message level ratings. These summaries are presented as: 1) collapsed over conditions, and 2) split by condition.

  • dotted line = mean rating across all conditions and messages

by condition

NOTE: These plots are substantively the same as the ones prested in “visualize condition effects > message-level” sub-section. These plots are presented here for ease of comparison with the message effects collapsed across conditions.

run models

standard

Models = lmer(DV ~ condition + (1 | SID) + (1 | message), data = data_mod)

  • Betas are standardized regression coefficients

self motivation

  • message-level
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: msg_motiv_self ~ condition + (1 | SID) + (1 | message)
##    Data: data_mod
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 381
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.4587 -0.3877  0.1177  0.6364  2.1968 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  SID      (Intercept) 0.5036   0.7097  
##  message  (Intercept) 0.0000   0.0000  
##  Residual             0.5189   0.7204  
## Number of obs: 150, groups:  SID, 30; message, 15
## 
## Fixed effects:
##                      Estimate Std. Error       df t value Pr(>|t|)
## (Intercept)          -0.09288    0.18902 28.00000  -0.491    0.627
## conditionencouraging  0.21433    0.28715 28.00000   0.746    0.462
## 
## Correlation of Fixed Effects:
##             (Intr)
## cndtnncrgng -0.658
## convergence code: 0
## boundary (singular) fit: see ?isSingular

social motivation

  • message-level
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: msg_motiv_other ~ condition + (1 | SID) + (1 | message)
##    Data: data_mod
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 381
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.2090 -0.4491  0.1493  0.5874  1.9092 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  SID      (Intercept) 0.515756 0.71816 
##  message  (Intercept) 0.001255 0.03542 
##  Residual             0.515445 0.71795 
## Number of obs: 150, groups:  SID, 30; message, 15
## 
## Fixed effects:
##                      Estimate Std. Error       df t value Pr(>|t|)
## (Intercept)           0.04142    0.19102 27.49515   0.217    0.830
## conditionencouraging -0.09573    0.28985 27.98712  -0.330    0.744
## 
## Correlation of Fixed Effects:
##             (Intr)
## cndtnncrgng -0.658

sharing

  • message-level
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: msg_share ~ condition + (1 | SID) + (1 | message)
##    Data: data_mod
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 373.2
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.4412 -0.4252 -0.0218  0.6095  2.4295 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  SID      (Intercept) 0.547247 0.73976 
##  message  (Intercept) 0.006904 0.08309 
##  Residual             0.473235 0.68792 
## Number of obs: 150, groups:  SID, 30; message, 15
## 
## Fixed effects:
##                      Estimate Std. Error       df t value Pr(>|t|)
## (Intercept)          -0.09035    0.19552 27.84504  -0.462    0.648
## conditionencouraging  0.20767    0.29525 27.95131   0.703    0.488
## 
## Correlation of Fixed Effects:
##             (Intr)
## cndtnncrgng -0.654

social relevance

  • person-level
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: msg_rel_social ~ condition + (1 | SID) + (1 | message)
##    Data: data_mod
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 377.2
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.8216 -0.4579  0.1459  0.4840  1.9404 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  SID      (Intercept) 0.51007  0.7142  
##  message  (Intercept) 0.06459  0.2542  
##  Residual             0.45905  0.6775  
## Number of obs: 150, groups:  SID, 30; message, 15
## 
## Fixed effects:
##                      Estimate Std. Error       df t value Pr(>|t|)
## (Intercept)          -0.02791    0.19937 31.87066  -0.140    0.890
## conditionencouraging  0.04483    0.28614 27.72210   0.157    0.877
## 
## Correlation of Fixed Effects:
##             (Intr)
## cndtnncrgng -0.621

intentions

  • person-level
## 
## Call:
## lm(formula = intentions1 ~ condition, data = data_mod_person)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -2.2845 -0.5676  0.2444  0.6670  1.1476 
## 
## Coefficients:
##                          Estimate Std. Error t value Pr(>|t|)
## (Intercept)                0.2723     0.2739   0.994    0.329
## conditionmessage control  -0.4806     0.3638  -1.321    0.197
## 
## Residual standard error: 0.9874 on 28 degrees of freedom
## Multiple R-squared:  0.05867,    Adjusted R-squared:  0.02505 
## F-statistic: 1.745 on 1 and 28 DF,  p-value: 0.1972

norm (close)

  • person-level
## 
## Call:
## lm(formula = norms_close1 ~ condition, data = data_mod_person)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.14328 -0.48744  0.01678  0.61092  1.96115 
## 
## Coefficients:
##                          Estimate Std. Error t value Pr(>|t|)
## (Intercept)               0.08179    0.28151   0.291    0.774
## conditionmessage control -0.14434    0.37397  -0.386    0.702
## 
## Residual standard error: 1.015 on 28 degrees of freedom
## Multiple R-squared:  0.005292,   Adjusted R-squared:  -0.03023 
## F-statistic: 0.149 on 1 and 28 DF,  p-value: 0.7024

norm (city/town)

  • person-level
## 
## Call:
## lm(formula = norms_town1 ~ condition, data = data_mod_person)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -1.8742 -0.4444 -0.1517  0.3836  2.7403 
## 
## Coefficients:
##                          Estimate Std. Error t value Pr(>|t|)
## (Intercept)                0.3322     0.2697   1.232    0.228
## conditionmessage control  -0.5862     0.3582  -1.636    0.113
## 
## Residual standard error: 0.9723 on 28 degrees of freedom
## Multiple R-squared:  0.08729,    Adjusted R-squared:  0.0547 
## F-statistic: 2.678 on 1 and 28 DF,  p-value: 0.1129

beliefs

  • person-level
## 
## Call:
## lm(formula = beliefs ~ condition, data = data_mod_person)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -0.4426 -0.3871 -0.1832 -0.0804  5.0448 
## 
## Coefficients:
##                          Estimate Std. Error t value Pr(>|t|)
## (Intercept)               -0.1218     0.2806  -0.434    0.668
## conditionmessage control   0.2149     0.3728   0.577    0.569
## 
## Residual standard error: 1.012 on 28 degrees of freedom
## Multiple R-squared:  0.01173,    Adjusted R-squared:  -0.02356 
## F-statistic: 0.3324 on 1 and 28 DF,  p-value: 0.5689

moderation by need for cognition

Models = lmer(DV ~ condition x cognition + (1 | SID) + (1 | message), data = data_mod)

  • Betas are standardized regression coefficients

motivation self

  • message-level
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: 
## msg_motiv_self ~ condition * cognition + (1 | SID) + (1 | message)
##    Data: data_mod
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 382.3
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.4315 -0.3855  0.1864  0.6029  2.1915 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  SID      (Intercept) 0.5278   0.7265  
##  message  (Intercept) 0.0000   0.0000  
##  Residual             0.5189   0.7204  
## Number of obs: 150, groups:  SID, 30; message, 15
## 
## Fixed effects:
##                                Estimate Std. Error       df t value
## (Intercept)                    -0.10914    0.20902 26.00000  -0.522
## conditionencouraging            0.34173    0.32586 26.00000   1.049
## cognition                       0.04412    0.21936 26.00000   0.201
## conditionencouraging:cognition  0.18648    0.32865 26.00000   0.567
##                                Pr(>|t|)
## (Intercept)                       0.606
## conditionencouraging              0.304
## cognition                         0.842
## conditionencouraging:cognition    0.575
## 
## Correlation of Fixed Effects:
##             (Intr) cndtnn cogntn
## cndtnncrgng -0.641              
## cognition   -0.387  0.248       
## cndtnncrgn:  0.258  0.104 -0.667
## convergence code: 0
## boundary (singular) fit: see ?isSingular

motivation other

  • message-level
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: 
## msg_motiv_other ~ condition * cognition + (1 | SID) + (1 | message)
##    Data: data_mod
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 382.7
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.1992 -0.4261  0.1823  0.5628  1.8936 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  SID      (Intercept) 0.552251 0.74314 
##  message  (Intercept) 0.001565 0.03956 
##  Residual             0.515141 0.71773 
## Number of obs: 150, groups:  SID, 30; message, 15
## 
## Fixed effects:
##                                Estimate Std. Error       df t value
## (Intercept)                     0.04770    0.21316 25.77347   0.224
## conditionencouraging           -0.02268    0.33194 25.98862  -0.068
## cognition                      -0.01703    0.22347 25.99936  -0.076
## conditionencouraging:cognition  0.18173    0.33482 26.00220   0.543
##                                Pr(>|t|)
## (Intercept)                       0.825
## conditionencouraging              0.946
## cognition                         0.940
## conditionencouraging:cognition    0.592
## 
## Correlation of Fixed Effects:
##             (Intr) cndtnn cogntn
## cndtnncrgng -0.641              
## cognition   -0.386  0.248       
## cndtnncrgn:  0.258  0.104 -0.668

sharing

  • message-level
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: msg_share ~ condition * cognition + (1 | SID) + (1 | message)
##    Data: data_mod
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 369.1
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.3494 -0.4411  0.0216  0.5789  2.4843 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  SID      (Intercept) 0.45009  0.67089 
##  message  (Intercept) 0.00609  0.07804 
##  Residual             0.47403  0.68850 
## Number of obs: 150, groups:  SID, 30; message, 15
## 
## Fixed effects:
##                                Estimate Std. Error      df t value
## (Intercept)                     -0.2164     0.1952 25.7716  -1.108
## conditionencouraging             0.5570     0.3028 25.9376   1.840
## cognition                        0.3420     0.2039 25.9582   1.678
## conditionencouraging:cognition   0.1212     0.3055 25.9644   0.397
##                                Pr(>|t|)  
## (Intercept)                      0.2780  
## conditionencouraging             0.0773 .
## cognition                        0.1054  
## conditionencouraging:cognition   0.6949  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndtnn cogntn
## cndtnncrgng -0.638              
## cognition   -0.385  0.248       
## cndtnncrgn:  0.257  0.104 -0.668

social relevance

  • person-level
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: 
## msg_rel_social ~ condition * cognition + (1 | SID) + (1 | message)
##    Data: data_mod
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 378.8
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.7972 -0.4354  0.1419  0.4982  1.9357 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  SID      (Intercept) 0.54048  0.7352  
##  message  (Intercept) 0.06647  0.2578  
##  Residual             0.45819  0.6769  
## Number of obs: 150, groups:  SID, 30; message, 15
## 
## Fixed effects:
##                                 Estimate Std. Error        df t value
## (Intercept)                     0.005875   0.219617 29.674479   0.027
## conditionencouraging            0.093705   0.326471 25.776149   0.287
## cognition                      -0.092437   0.220158 25.961652  -0.420
## conditionencouraging:cognition  0.263864   0.330056 26.026216   0.799
##                                Pr(>|t|)
## (Intercept)                       0.979
## conditionencouraging              0.776
## cognition                         0.678
## conditionencouraging:cognition    0.431
## 
## Correlation of Fixed Effects:
##             (Intr) cndtnn cogntn
## cndtnncrgng -0.611              
## cognition   -0.369  0.248       
## cndtnncrgn:  0.247  0.104 -0.669

intentions

  • person-level
## 
## Call:
## lm(formula = intentions1 ~ condition * cognition, data = data_mod_person)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -2.3060 -0.5791  0.2138  0.6747  1.2173 
## 
## Coefficients:
##                                    Estimate Std. Error t value Pr(>|t|)
## (Intercept)                         0.26133    0.32212   0.811    0.425
## conditionmessage control           -0.48706    0.41987  -1.160    0.257
## cognition                          -0.02284    0.31534  -0.072    0.943
## conditionmessage control:cognition  0.07023    0.42347   0.166    0.870
## 
## Residual standard error: 1.024 on 26 degrees of freedom
## Multiple R-squared:  0.05988,    Adjusted R-squared:  -0.0486 
## F-statistic: 0.552 on 3 and 26 DF,  p-value: 0.6513

mediation via norms

mod1 = lm(norm_close ~ condition, data = data_mod_person)

mod2 = lm(DV ~ norm_close + condition, data = data_mod_person)

mediation_mod = mediate(mod1, mod2, sims=1000, treat=“condition”, mediator=“mediator”)

  • Betas are standardized regression coefficients
  • person-level

motivation self

  • message-level
## 
## Causal Mediation Analysis 
## 
## Quasi-Bayesian Confidence Intervals
## 
##                Estimate 95% CI Lower 95% CI Upper p-value
## ACME            -0.0452      -0.3432         0.20    0.71
## ADE             -0.2254      -0.9471         0.47    0.54
## Total Effect    -0.2706      -1.0075         0.49    0.50
## Prop. Mediated   0.0746      -2.1455         1.94    0.72
## 
## Sample Size Used: 30 
## 
## 
## Simulations: 1000

motivation other

  • message-level
## 
## Causal Mediation Analysis 
## 
## Quasi-Bayesian Confidence Intervals
## 
##                Estimate 95% CI Lower 95% CI Upper p-value
## ACME            -0.0214      -0.2590         0.13    0.85
## ADE              0.1447      -0.6182         0.98    0.71
## Total Effect     0.1233      -0.6391         0.96    0.74
## Prop. Mediated   0.0030      -2.5993         1.94    0.96
## 
## Sample Size Used: 30 
## 
## 
## Simulations: 1000

sharing

  • message-level
## 
## Causal Mediation Analysis 
## 
## Quasi-Bayesian Confidence Intervals
## 
##                Estimate 95% CI Lower 95% CI Upper p-value
## ACME            -0.0375      -0.3151         0.18    0.76
## ADE             -0.2398      -0.9294         0.38    0.52
## Total Effect    -0.2773      -1.0077         0.38    0.46
## Prop. Mediated   0.0583      -2.0969         2.25    0.74
## 
## Sample Size Used: 30 
## 
## 
## Simulations: 1000

Relevance social

  • person-level
## 
## Causal Mediation Analysis 
## 
## Quasi-Bayesian Confidence Intervals
## 
##                Estimate 95% CI Lower 95% CI Upper p-value
## ACME           -0.03674     -0.34612         0.23    0.79
## ADE            -0.00685     -0.71579         0.64    1.00
## Total Effect   -0.04359     -0.81228         0.68    0.94
## Prop. Mediated  0.04590     -3.33932         2.84    0.83
## 
## Sample Size Used: 30 
## 
## 
## Simulations: 1000

intentions

  • person-level
## 
## Causal Mediation Analysis 
## 
## Quasi-Bayesian Confidence Intervals
## 
##                Estimate 95% CI Lower 95% CI Upper p-value
## ACME            -0.0476      -0.3694         0.23    0.71
## ADE             -0.4132      -1.0933         0.23    0.25
## Total Effect    -0.4608      -1.2055         0.24    0.24
## Prop. Mediated   0.1014      -1.6638         1.94    0.68
## 
## Sample Size Used: 30 
## 
## 
## Simulations: 1000

Pilot 2

Here, we test the effect of “encouraging” and “mocking” humor versus message control, which were non-humorous. Mocking humor refers to to humorous framing of health messages such that these messages were ridiculing individuals who would choose to not follow COVID-19 related preventative measures (like social distancing), even though their circumstances allow them to follow those measures. In contrast, encouraging messages used humorous framings which did not overtly mock or ridicule any person or group of people. In this study, we used the following sets of stimuli:

  • mocking-paired, encouraging-paired: 6 messages in each condition, such that each message in mocking condition shares the first sentence (or message control) with one message in the encouraging condition
  • mocking-unpaired: 6 messages which are not paired with encouraging condition
  • encouraging-unpaired: 5 messages which are not paired with mocking condition

In this analysis, we combine mocking-paired and mocking-unpaired into “mocking” condition, and similarly, we combine encouraging-paired and encouraging-unpaired into “encouraging” condition.

## tidy data for analysis

data = data_pilot2

messages = data %>%
  filter(condition %in% c("message control", "encouraging-unpaired", "encouraging-paired", 
                          "mocking-unpaired", "mocking-paired")) %>%
  mutate(condition = str_replace(condition, "-paired|-unpaired", "")) %>%
  filter(grepl("msg", survey_name)) %>%
  mutate(value = as.numeric(value),
         value = ifelse(item == "cognition_2", abs(6 - value), value),
         value = ifelse(item == "cognition_4", abs(6 - value), value),
         value = ifelse(item == "cognition_6", abs(6 - value), value)) %>% 
  extract(item, "item", "msg_.*_(.*)") %>%
  spread(survey_name, value) %>%
  mutate(msg_favorability = msg_positive - msg_negative) %>%
  dplyr::select(-msg_negative, -msg_positive) %>%
  gather(survey_name, value, contains("msg")) %>%
  mutate(item = sprintf("%s_%s", survey_name, item))

data_tidy = data %>%
  filter(condition %in% c("message control", "encouraging-unpaired", "encouraging-paired", 
                          "mocking-unpaired", "mocking-paired")) %>%
  mutate(condition = str_replace(condition, "-paired|-unpaired", "")) %>%
  filter(grepl("cognition|intentions|norms_close|norms_town|beliefs_safe|beliefs_norms|politics_party|politics_conserv|^age$|gender", survey_name)) %>%
  mutate(value = as.numeric(value)) %>% 
  bind_rows(messages)

control_vars = data %>%
  filter(grepl("state|gender|^age$", survey_name)) %>%
  dplyr::select(condition, SID, survey_name, value) %>%
  unique() %>%
  spread(survey_name, value) %>%
  mutate(state = as.factor(state),
         gender = recode(gender, "1" = "male", "2" = "female", "3" = "other", "4" = "prefer not to say"),
         age = scale(as.numeric(age), center = TRUE, scale = FALSE))

data_person = data_tidy %>%
  filter(grepl("msg|cognition|beliefs|intentions1_2|intentions1_4|intentions1_6|intentions1_10|norms_close1_2|norms_close1_4|norms_close1_6|norms_close1_10|norms_town1_2|norms_town1_4|norms_town1_6|norms_town1_10|politics_party|politics_conserv", item)) %>%
  group_by(condition, SID, survey_name) %>%
  summarize(value = mean(value, na.rm = TRUE))

number of subjects per condition

visualize condition effects

In this section, we summarize condition effects on message ratings and other subject-level DVs and covariates.

person-level

A summary of condition effects on message ratings and other DVs/covariates at person level.

other DVs and covariates

survey level

A summary of condition effects on each survey, aggregated across survey items. ###### intentions, beliefs, cognition

message effects

In this section, we summarize the effect of messages on message level ratings. These summaries are presented as: 1) collapsed over conditions, and 2) split by condition.

  • dotted line = mean rating across all conditions and messages

by condition

NOTE: These plots are substantively the same as the ones prested in “visualize condition effects > message-level” sub-section. These plots are presented here for ease of comparison with the message effects collapsed across conditions.

run models

standard

Models = lmer(DV ~ condition + (1 | SID) + (1 | message), data = data_mod)

  • Betas are standardized regression coefficients

self motivation

  • message-level
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: msg_motiv_self ~ condition + (1 | SID) + (1 | message)
##    Data: data_mod
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 929.5
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.5320 -0.3285  0.1324  0.3684  2.8713 
## 
## Random effects:
##  Groups   Name        Variance  Std.Dev.
##  SID      (Intercept) 0.4609632 0.67894 
##  message  (Intercept) 0.0001903 0.01379 
##  Residual             0.5126932 0.71603 
## Number of obs: 369, groups:  SID, 74; message, 15
## 
## Fixed effects:
##                      Estimate Std. Error      df t value Pr(>|t|)  
## (Intercept)            0.1759     0.1372 70.0300   1.282   0.2041  
## conditionencouraging  -0.1304     0.2168 70.2302  -0.602   0.5494  
## conditionmocking      -0.4119     0.2057 70.0662  -2.002   0.0491 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndtnn
## cndtnncrgng -0.632       
## condtnmckng -0.667  0.422

social motivation

  • message-level
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: msg_motiv_other ~ condition + (1 | SID) + (1 | message)
##    Data: data_mod
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 940.6
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.2102 -0.3525  0.0931  0.4454  3.0318 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  SID      (Intercept) 0.468818 0.6847  
##  message  (Intercept) 0.009585 0.0979  
##  Residual             0.522969 0.7232  
## Number of obs: 369, groups:  SID, 74; message, 15
## 
## Fixed effects:
##                      Estimate Std. Error      df t value Pr(>|t|)  
## (Intercept)            0.1703     0.1407 70.2959   1.210   0.2302  
## conditionencouraging  -0.1351     0.2214 71.9276  -0.610   0.5438  
## conditionmocking      -0.3831     0.2104 71.7981  -1.821   0.0727 .
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndtnn
## cndtnncrgng -0.616       
## condtnmckng -0.649  0.437

sharing

  • message-level
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: msg_share ~ condition + (1 | SID) + (1 | message)
##    Data: data_mod
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 772.6
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.6347 -0.4390  0.0318  0.4039  3.4686 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  SID      (Intercept) 0.750362 0.86623 
##  message  (Intercept) 0.004429 0.06655 
##  Residual             0.273089 0.52258 
## Number of obs: 369, groups:  SID, 74; message, 15
## 
## Fixed effects:
##                       Estimate Std. Error        df t value Pr(>|t|)
## (Intercept)           0.009405   0.164777 71.926070   0.057    0.955
## conditionencouraging -0.236003   0.260131 71.867816  -0.907    0.367
## conditionmocking      0.142945   0.246901 71.930941   0.579    0.564
## 
## Correlation of Fixed Effects:
##             (Intr) cndtnn
## cndtnncrgng -0.627       
## condtnmckng -0.661  0.427

Relevance social

  • person-level
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: msg_rel_social ~ condition + (1 | SID) + (1 | message)
##    Data: data_mod
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 951.2
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -5.3538 -0.1671  0.1070  0.3229  2.4539 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  SID      (Intercept) 0.413963 0.64340 
##  message  (Intercept) 0.004922 0.07016 
##  Residual             0.557197 0.74646 
## Number of obs: 369, groups:  SID, 74; message, 15
## 
## Fixed effects:
##                      Estimate Std. Error       df t value Pr(>|t|)  
## (Intercept)           0.12525    0.13372 70.31167   0.937   0.3521  
## conditionencouraging  0.08531    0.21090 71.17697   0.405   0.6871  
## conditionmocking     -0.47577    0.20026 70.97368  -2.376   0.0202 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndtnn
## cndtnncrgng -0.623       
## condtnmckng -0.656  0.431

intentions

  • person-level
## 
## Call:
## lm(formula = intentions1 ~ condition, data = data_mod_person)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -3.1541 -0.3342  0.3655  0.7311  0.9399 
## 
## Coefficients:
##                          Estimate Std. Error t value Pr(>|t|)
## (Intercept)              -0.07621    0.22575  -0.338    0.737
## conditionmessage control  0.02089    0.29145   0.072    0.943
## conditionmocking          0.20888    0.30567   0.683    0.497
## 
## Residual standard error: 1.01 on 71 degrees of freedom
## Multiple R-squared:  0.008636,   Adjusted R-squared:  -0.01929 
## F-statistic: 0.3092 on 2 and 71 DF,  p-value: 0.735

norm (close)

  • person-level
## 
## Call:
## lm(formula = norms_close1 ~ condition, data = data_mod_person)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -2.8502 -0.5378  0.2473  0.6539  1.3002 
## 
## Coefficients:
##                          Estimate Std. Error t value Pr(>|t|)   
## (Intercept)               -0.4682     0.2143  -2.185  0.03221 * 
## conditionmessage control   0.4793     0.2767   1.732  0.08755 . 
## conditionmocking           0.8445     0.2902   2.910  0.00482 **
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.9584 on 71 degrees of freedom
## Multiple R-squared:  0.1067, Adjusted R-squared:  0.08149 
## F-statistic: 4.238 on 2 and 71 DF,  p-value: 0.01824

norm (city/town)

  • person-level
## 
## Call:
## lm(formula = norms_town1 ~ condition, data = data_mod_person)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.49909 -0.44123  0.00739  0.69101  1.83882 
## 
## Coefficients:
##                          Estimate Std. Error t value Pr(>|t|)   
## (Intercept)               -0.4389     0.2135  -2.056   0.0435 * 
## conditionmessage control   0.3906     0.2756   1.417   0.1608   
## conditionmocking           0.8650     0.2891   2.993   0.0038 **
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.9547 on 71 degrees of freedom
## Multiple R-squared:  0.1134, Adjusted R-squared:  0.08847 
## F-statistic: 4.542 on 2 and 71 DF,  p-value: 0.01392

moderation by need for cognition

Models = lmer(DV ~ condition x cognition + (1 | SID) + (1 | message), data = data_mod)

  • Betas are standardized regression coefficients

motivation self

  • message-level
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: 
## msg_motiv_self ~ condition * cognition + (1 | SID) + (1 | message)
##    Data: data_mod
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 934.2
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.5547 -0.3165  0.1174  0.3692  2.8569 
## 
## Random effects:
##  Groups   Name        Variance  Std.Dev.
##  SID      (Intercept) 0.4815259 0.6939  
##  message  (Intercept) 0.0001664 0.0129  
##  Residual             0.5127043 0.7160  
## Number of obs: 369, groups:  SID, 74; message, 15
## 
## Fixed effects:
##                                Estimate Std. Error       df t value
## (Intercept)                     0.17615    0.13967 67.16753   1.261
## conditionencouraging           -0.13031    0.22081 67.33454  -0.590
## conditionmocking               -0.41153    0.20942 67.19324  -1.965
## cognition                      -0.04103    0.12904 67.98831  -0.318
## conditionencouraging:cognition  0.05924    0.28203 67.89793   0.210
## conditionmocking:cognition     -0.04964    0.19256 67.92560  -0.258
##                                Pr(>|t|)  
## (Intercept)                      0.2116  
## conditionencouraging             0.5571  
## conditionmocking                 0.0535 .
## cognition                        0.7515  
## conditionencouraging:cognition   0.8343  
## conditionmocking:cognition       0.7973  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndtnn cndtnm cogntn cndtnn:
## cndtnncrgng -0.632                             
## condtnmckng -0.667  0.422                      
## cognition   -0.008  0.005  0.005               
## cndtnncrgn:  0.003  0.020 -0.002 -0.458        
## cndtnmckng:  0.005 -0.003 -0.007 -0.670  0.307

motivation other

  • message-level
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: 
## msg_motiv_other ~ condition * cognition + (1 | SID) + (1 | message)
##    Data: data_mod
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 945.4
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.1917 -0.3495  0.0940  0.4510  3.0505 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  SID      (Intercept) 0.490205 0.70015 
##  message  (Intercept) 0.009629 0.09813 
##  Residual             0.522938 0.72314 
## Number of obs: 369, groups:  SID, 74; message, 15
## 
## Fixed effects:
##                                 Estimate Std. Error        df t value
## (Intercept)                     0.170068   0.143233 67.676649   1.187
## conditionencouraging           -0.131674   0.225491 69.008253  -0.584
## conditionmocking               -0.383273   0.214147 68.922520  -1.790
## cognition                       0.027697   0.130310 68.009492   0.213
## conditionencouraging:cognition  0.120972   0.284652 67.789525   0.425
## conditionmocking:cognition      0.008489   0.194376 67.856866   0.044
##                                Pr(>|t|)  
## (Intercept)                      0.2392  
## conditionencouraging             0.5612  
## conditionmocking                 0.0779 .
## cognition                        0.8323  
## conditionencouraging:cognition   0.6722  
## conditionmocking:cognition       0.9653  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndtnn cndtnm cogntn cndtnn:
## cndtnncrgng -0.617                             
## condtnmckng -0.649  0.436                      
## cognition   -0.007  0.001  0.001               
## cndtnncrgn:  0.003  0.022 -0.001 -0.458        
## cndtnmckng:  0.005 -0.001 -0.004 -0.670  0.307

sharing

  • message-level
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: msg_share ~ condition * cognition + (1 | SID) + (1 | message)
##    Data: data_mod
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 772.3
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.6763 -0.4308  0.0295  0.4066  3.5012 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  SID      (Intercept) 0.731859 0.8555  
##  message  (Intercept) 0.004356 0.0660  
##  Residual             0.273130 0.5226  
## Number of obs: 369, groups:  SID, 74; message, 15
## 
## Fixed effects:
##                                 Estimate Std. Error        df t value
## (Intercept)                     0.007055   0.162886 68.889162   0.043
## conditionencouraging           -0.226277   0.257219 68.832217  -0.880
## conditionmocking                0.144757   0.244063 68.892727   0.593
## cognition                       0.266749   0.149740 68.051775   1.781
## conditionencouraging:cognition  0.089839   0.327275 67.964231   0.275
## conditionmocking:cognition     -0.265923   0.223444 67.990876  -1.190
##                                Pr(>|t|)  
## (Intercept)                      0.9656  
## conditionencouraging             0.3821  
## conditionmocking                 0.5550  
## cognition                        0.0793 .
## conditionencouraging:cognition   0.7845  
## conditionmocking:cognition       0.2381  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndtnn cndtnm cogntn cndtnn:
## cndtnncrgng -0.627                             
## condtnmckng -0.661  0.427                      
## cognition   -0.008  0.004  0.004               
## cndtnncrgn:  0.004  0.021 -0.002 -0.458        
## cndtnmckng:  0.005 -0.003 -0.006 -0.670  0.307

Relevance social

  • person-level
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: 
## msg_rel_social ~ condition * cognition + (1 | SID) + (1 | message)
##    Data: data_mod
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 950.4
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -5.3322 -0.1918  0.1055  0.3569  2.4761 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  SID      (Intercept) 0.388872 0.62360 
##  message  (Intercept) 0.004771 0.06907 
##  Residual             0.557280 0.74651 
## Number of obs: 369, groups:  SID, 74; message, 15
## 
## Fixed effects:
##                                Estimate Std. Error       df t value
## (Intercept)                     0.12666    0.13052 67.25782   0.970
## conditionencouraging            0.09425    0.20591 68.11781   0.458
## conditionmocking               -0.47731    0.19546 67.90795  -2.442
## cognition                      -0.18509    0.11950 68.15135  -1.549
## conditionencouraging:cognition  0.62812    0.26106 67.95242   2.406
## conditionmocking:cognition      0.28143    0.17826 68.01346   1.579
##                                Pr(>|t|)  
## (Intercept)                      0.3353  
## conditionencouraging             0.6486  
## conditionmocking                 0.0172 *
## cognition                        0.1261  
## conditionencouraging:cognition   0.0189 *
## conditionmocking:cognition       0.1190  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndtnn cndtnm cogntn cndtnn:
## cndtnncrgng -0.623                             
## condtnmckng -0.656  0.431                      
## cognition   -0.007  0.002  0.002               
## cndtnncrgn:  0.003  0.021 -0.001 -0.458        
## cndtnmckng:  0.005 -0.002 -0.005 -0.670  0.307

intentions

  • person-level
## 
## Call:
## lm(formula = intentions1 ~ condition * cognition, data = data_mod_person)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -3.1470 -0.4534  0.3932  0.7426  1.1121 
## 
## Coefficients:
##                                    Estimate Std. Error t value Pr(>|t|)
## (Intercept)                        -0.07243    0.22955  -0.316    0.753
## conditionmessage control            0.01704    0.29629   0.058    0.954
## conditionmocking                    0.20600    0.31074   0.663    0.510
## cognition                           0.17040    0.33669   0.506    0.614
## conditionmessage control:cognition -0.16363    0.37861  -0.432    0.667
## conditionmocking:cognition         -0.30414    0.38752  -0.785    0.435
## 
## Residual standard error: 1.026 on 68 degrees of freedom
## Multiple R-squared:  0.01936,    Adjusted R-squared:  -0.05275 
## F-statistic: 0.2685 on 5 and 68 DF,  p-value: 0.9288

mediation via norms

mod1 = lm(norm_close ~ condition, data = data_mod_person)

mod2 = lm(DV ~ norm_close + condition, data = data_mod_person)

mediation_mod = mediate(mod1, mod2, sims=1000, treat=“condition”, mediator=“mediator”)

  • Betas are standardized regression coefficients
  • person-level

motivation self

  • message-level
## 
## Causal Mediation Analysis 
## 
## Quasi-Bayesian Confidence Intervals
## 
##                Estimate 95% CI Lower 95% CI Upper p-value
## ACME             0.0518      -0.0652         0.22    0.39
## ADE              0.1125      -0.4701         0.69    0.69
## Total Effect     0.1643      -0.4477         0.72    0.58
## Prop. Mediated   0.0784      -2.0272         3.05    0.71
## 
## Sample Size Used: 74 
## 
## 
## Simulations: 1000

motivation other

  • message-level
## 
## Causal Mediation Analysis 
## 
## Quasi-Bayesian Confidence Intervals
## 
##                Estimate 95% CI Lower 95% CI Upper p-value
## ACME             0.0468      -0.0879         0.21    0.47
## ADE              0.1394      -0.4625         0.71    0.66
## Total Effect     0.1862      -0.4026         0.74    0.52
## Prop. Mediated   0.0632      -4.6489         2.77    0.75
## 
## Sample Size Used: 74 
## 
## 
## Simulations: 1000

sharing

  • message-level
## 
## Causal Mediation Analysis 
## 
## Quasi-Bayesian Confidence Intervals
## 
##                Estimate 95% CI Lower 95% CI Upper p-value
## ACME             0.0342      -0.0816         0.19    0.60
## ADE              0.2082      -0.3464         0.78    0.48
## Total Effect     0.2423      -0.3121         0.80    0.39
## Prop. Mediated   0.0551      -1.8344         2.18    0.73
## 
## Sample Size Used: 74 
## 
## 
## Simulations: 1000

Relevance social

  • person-level
## 
## Causal Mediation Analysis 
## 
## Quasi-Bayesian Confidence Intervals
## 
##                Estimate 95% CI Lower 95% CI Upper p-value
## ACME             0.0821      -0.0358         0.28    0.24
## ADE             -0.1858      -0.7233         0.37    0.52
## Total Effect    -0.1037      -0.6506         0.45    0.72
## Prop. Mediated  -0.0744      -4.9620         5.58    0.83
## 
## Sample Size Used: 74 
## 
## 
## Simulations: 1000

intentions

  • person-level
## 
## Causal Mediation Analysis 
## 
## Quasi-Bayesian Confidence Intervals
## 
##                 Estimate 95% CI Lower 95% CI Upper p-value  
## ACME             0.27445     -0.01965         0.63   0.078 .
## ADE             -0.26906     -0.78128         0.21   0.302  
## Total Effect     0.00539     -0.59536         0.59   0.978  
## Prop. Mediated   0.37773    -12.60033        19.44   0.908  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Sample Size Used: 74 
## 
## 
## Simulations: 1000